a functioning backend stack bypassing firebase and using an existing domain

This commit is contained in:
Ammar Qaffaf
2025-06-29 20:45:38 -04:00
parent 90ab291d83
commit d4d1ec817d
12 changed files with 1202 additions and 129 deletions

5
.gitignore vendored
View File

@ -59,3 +59,8 @@ pids
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
config.dev
cdk.out
backend-cdk-new.out
web-cdk.out
backend-cdk.out

View File

@ -1,16 +1,28 @@
FROM node:20-alpine
FROM --platform=linux/amd64 node:20-alpine
# curl for health checks
RUN apk add --no-cache curl
WORKDIR /app
COPY package*.json ./
RUN npm install
RUN npm install -g @nestjs/cli
RUN npm install --production --ignore-scripts
COPY . .
RUN npm run build
EXPOSE 4000
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nestjs -u 1001
CMD ["npm", "run", "start"]
RUN chown -R nestjs:nodejs /app
USER nestjs
EXPOSE 3000
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:3000/health || exit 1
CMD ["npm", "run", "start:prod"]

16
cdk.context.json Normal file
View File

@ -0,0 +1,16 @@
{
"availability-zones:account=426265406140:region=us-east-2": [
"us-east-2a",
"us-east-2b",
"us-east-2c"
],
"availability-zones:account=482311766496:region=us-east-2": [
"us-east-2a",
"us-east-2b",
"us-east-2c"
],
"hosted-zone:account=482311766496:domainName=syncrow.me:region=us-east-2": {
"Id": "/hostedzone/Z02085662NLJECF4DGJV3",
"Name": "syncrow.me."
}
}

58
cdk.json Normal file
View File

@ -0,0 +1,58 @@
{
"app": "npx ts-node --prefer-ts-exts infrastructure/app.ts",
"watch": {
"include": [
"**"
],
"exclude": [
"README.md",
"cdk*.json",
"**/*.d.ts",
"**/*.js",
"tsconfig.json",
"package*.json",
"yarn.lock",
"node_modules",
"test"
]
},
"context": {
"@aws-cdk/aws-lambda:recognizeLayerVersion": true,
"@aws-cdk/core:checkSecretUsage": true,
"@aws-cdk/core:target-partitions": [
"aws",
"aws-cn"
],
"@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true,
"@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true,
"@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true,
"@aws-cdk/aws-iam:minimizePolicies": true,
"@aws-cdk/core:validateSnapshotRemovalPolicy": true,
"@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true,
"@aws-cdk/aws-s3:createDefaultLoggingPolicy": true,
"@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true,
"@aws-cdk/aws-apigateway:disableCloudWatchRole": true,
"@aws-cdk/core:enablePartitionLiterals": true,
"@aws-cdk/aws-events:eventsTargetQueueSameAccount": true,
"@aws-cdk/aws-iam:standardizedServicePrincipals": true,
"@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true,
"@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true,
"@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true,
"@aws-cdk/aws-route53-patters:useCertificate": true,
"@aws-cdk/customresources:installLatestAwsSdkDefault": false,
"@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true,
"@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true,
"@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true,
"@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true,
"@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true,
"@aws-cdk/aws-redshift:columnId": true,
"@aws-cdk/aws-stepfunctions-tasks:enableLogging": true,
"@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true,
"@aws-cdk/aws-apigateway:requestValidatorUniqueId": true,
"@aws-cdk/aws-kms:aliasNameRef": true,
"@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true,
"@aws-cdk/aws-ecs:removeDefaultDeploymentAlarm": true,
"@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true,
"@aws-cdk/aws-codepipeline-actions:useNewDefaultBranchForSourceAction": true
}
}

17
deploy.sh Executable file
View File

@ -0,0 +1,17 @@
#!/bin/bash
set -e
ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
REGION=${AWS_DEFAULT_REGION:-us-east-2}
echo "Deploying to account: $ACCOUNT_ID in region: $REGION"
aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com
docker build --platform=linux/amd64 -t syncrow-backend .
docker tag syncrow-backend:latest $ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com/syncrow-backend:latest
docker push $ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com/syncrow-backend:latest
npx cdk deploy SyncrowBackendStack --require-approval never
aws ecs update-service --cluster syncrow-backend-cluster --service $(aws ecs list-services --cluster syncrow-backend-cluster --query 'serviceArns[0]' --output text | cut -d'/' -f3) --force-new-deployment

15
infrastructure/app.ts Normal file
View File

@ -0,0 +1,15 @@
#!/usr/bin/env node
import 'source-map-support/register';
import * as cdk from 'aws-cdk-lib';
import { BackendStack } from './stack';
const app = new cdk.App();
new BackendStack(app, 'SyncrowBackendStack', {
env: {
account: process.env.CDK_DEFAULT_ACCOUNT,
region: process.env.CDK_DEFAULT_REGION,
},
databaseName: 'syncrow',
certificateArn: app.node.tryGetContext('certificateArn'),
});

330
infrastructure/stack.ts Normal file
View File

@ -0,0 +1,330 @@
import * as cdk from 'aws-cdk-lib';
import * as ec2 from 'aws-cdk-lib/aws-ec2';
import * as ecs from 'aws-cdk-lib/aws-ecs';
import * as ecsPatterns from 'aws-cdk-lib/aws-ecs-patterns';
import * as rds from 'aws-cdk-lib/aws-rds';
import * as ecr from 'aws-cdk-lib/aws-ecr';
import * as logs from 'aws-cdk-lib/aws-logs';
import * as elbv2 from 'aws-cdk-lib/aws-elasticloadbalancingv2';
import * as acm from 'aws-cdk-lib/aws-certificatemanager';
import * as route53 from 'aws-cdk-lib/aws-route53';
import { Construct } from 'constructs';
import * as dotenv from 'dotenv';
export interface BackendStackProps extends cdk.StackProps {
vpcId?: string;
databaseName?: string;
certificateArn?: string;
}
export class BackendStack extends cdk.Stack {
public readonly apiUrl: string;
public readonly databaseEndpoint: string;
public readonly vpc: ec2.IVpc;
constructor(scope: Construct, id: string, props?: BackendStackProps) {
super(scope, id, props);
// Load environment variables from .env file
dotenv.config({ path: '.env' });
// VPC - either use existing or create new
this.vpc = props?.vpcId
? ec2.Vpc.fromLookup(this, 'ExistingVpc', { vpcId: props.vpcId })
: new ec2.Vpc(this, 'SyncrowVpc', {
maxAzs: 2,
natGateways: 1,
subnetConfiguration: [
{
cidrMask: 24,
name: 'public',
subnetType: ec2.SubnetType.PUBLIC,
},
{
cidrMask: 24,
name: 'private',
subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS,
},
],
});
// Security Groups
const dbSecurityGroup = new ec2.SecurityGroup(this, 'DatabaseSecurityGroup', {
vpc: this.vpc,
description: 'Security group for RDS PostgreSQL',
allowAllOutbound: false,
});
const ecsSecurityGroup = new ec2.SecurityGroup(this, 'EcsSecurityGroup', {
vpc: this.vpc,
description: 'Security group for ECS Fargate service',
allowAllOutbound: true,
});
const albSecurityGroup = new ec2.SecurityGroup(this, 'AlbSecurityGroup', {
vpc: this.vpc,
description: 'Security group for Application Load Balancer',
allowAllOutbound: true,
});
// Allow ALB to connect to ECS
ecsSecurityGroup.addIngressRule(
albSecurityGroup,
ec2.Port.tcp(3000),
'Allow ALB to connect to ECS service'
);
// Allow ECS to connect to RDS
dbSecurityGroup.addIngressRule(
ecsSecurityGroup,
ec2.Port.tcp(5432),
'Allow ECS to connect to PostgreSQL'
);
// Allow HTTP/HTTPS traffic to ALB
albSecurityGroup.addIngressRule(
ec2.Peer.anyIpv4(),
ec2.Port.tcp(80),
'Allow HTTP traffic'
);
albSecurityGroup.addIngressRule(
ec2.Peer.anyIpv4(),
ec2.Port.tcp(443),
'Allow HTTPS traffic'
);
// RDS Aurora Serverless v2 PostgreSQL
const dbCluster = new rds.DatabaseCluster(this, 'SyncrowDatabase', {
engine: rds.DatabaseClusterEngine.auroraPostgres({
version: rds.AuroraPostgresEngineVersion.VER_15_4,
}),
vpc: this.vpc,
securityGroups: [dbSecurityGroup],
serverlessV2MinCapacity: 0.5,
serverlessV2MaxCapacity: 4,
writer: rds.ClusterInstance.serverlessV2('writer'),
defaultDatabaseName: props?.databaseName || 'syncrow',
credentials: rds.Credentials.fromGeneratedSecret('syncrowadmin', {
secretName: 'syncrow-db-credentials',
}),
removalPolicy: cdk.RemovalPolicy.DESTROY,
});
// ECR Repository for Docker images
const ecrRepository = new ecr.Repository(this, 'SyncrowBackendRepo', {
repositoryName: 'syncrow-backend',
removalPolicy: cdk.RemovalPolicy.DESTROY,
emptyOnDelete: true,
});
// ECS Cluster
const cluster = new ecs.Cluster(this, 'SyncrowCluster', {
vpc: this.vpc,
clusterName: 'syncrow-backend-cluster',
});
// CloudWatch Log Group
const logGroup = new logs.LogGroup(this, 'SyncrowBackendLogs', {
logGroupName: '/ecs/syncrow-backend',
retention: logs.RetentionDays.ONE_WEEK,
removalPolicy: cdk.RemovalPolicy.DESTROY,
});
// Use existing wildcard certificate or create new one
const apiCertificate = props?.certificateArn
? acm.Certificate.fromCertificateArn(this, 'ApiCertificate', props.certificateArn)
: new acm.Certificate(this, 'ApiCertificate', {
domainName: 'api.syncrow.me',
validation: acm.CertificateValidation.fromDns(),
});
// ECS Fargate Service with Application Load Balancer
const fargateService = new ecsPatterns.ApplicationLoadBalancedFargateService(this, 'SyncrowBackendService', {
cluster,
memoryLimitMiB: 1024,
cpu: 512,
desiredCount: 1,
domainName: 'api.syncrow.me',
domainZone: route53.HostedZone.fromLookup(this, 'SyncrowZone', {
domainName: 'syncrow.me',
}),
certificate: apiCertificate,
protocol: elbv2.ApplicationProtocol.HTTPS,
redirectHTTP: true,
taskImageOptions: {
image: ecs.ContainerImage.fromEcrRepository(ecrRepository, 'latest'),
containerPort: 3000,
environment: {
// App settings
NODE_ENV: process.env.NODE_ENV || 'production',
PORT: process.env.PORT || '3000',
BASE_URL: process.env.BASE_URL || '',
// Database connection (CDK provides these automatically)
AZURE_POSTGRESQL_HOST: dbCluster.clusterEndpoint.hostname,
AZURE_POSTGRESQL_PORT: '5432',
AZURE_POSTGRESQL_DATABASE: props?.databaseName || 'syncrow',
AZURE_POSTGRESQL_USER: 'syncrowadmin',
AZURE_POSTGRESQL_SSL: process.env.AZURE_POSTGRESQL_SSL || 'false',
AZURE_POSTGRESQL_SYNC: process.env.AZURE_POSTGRESQL_SYNC || 'false',
// JWT Configuration - CRITICAL: These must be set
JWT_SECRET: process.env.JWT_SECRET || 'syncrow-jwt-secret-key-2025-production-environment-very-secure-random-string',
JWT_SECRET_REFRESH: process.env.JWT_SECRET_REFRESH || 'syncrow-refresh-secret-key-2025-production-environment-different-secure-string',
JWT_EXPIRE_TIME: process.env.JWT_EXPIRE_TIME || '1h',
JWT_EXPIRE_TIME_REFRESH: process.env.JWT_EXPIRE_TIME_REFRESH || '7d',
// Firebase Configuration
FIREBASE_API_KEY: process.env.FIREBASE_API_KEY || '',
FIREBASE_AUTH_DOMAIN: process.env.FIREBASE_AUTH_DOMAIN || '',
FIREBASE_PROJECT_ID: process.env.FIREBASE_PROJECT_ID || '',
FIREBASE_STORAGE_BUCKET: process.env.FIREBASE_STORAGE_BUCKET || '',
FIREBASE_MESSAGING_SENDER_ID: process.env.FIREBASE_MESSAGING_SENDER_ID || '',
FIREBASE_APP_ID: process.env.FIREBASE_APP_ID || '',
FIREBASE_MEASUREMENT_ID: process.env.FIREBASE_MEASUREMENT_ID || '',
FIREBASE_DATABASE_URL: process.env.FIREBASE_DATABASE_URL || '',
// Tuya IoT Configuration
TUYA_EU_URL: process.env.TUYA_EU_URL || 'https://openapi.tuyaeu.com',
TUYA_ACCESS_ID: process.env.TUYA_ACCESS_ID || '',
TUYA_ACCESS_KEY: process.env.TUYA_ACCESS_KEY || '',
TRUN_ON_TUYA_SOCKET: process.env.TRUN_ON_TUYA_SOCKET || '',
// Email Configuration
SMTP_HOST: process.env.SMTP_HOST || '',
SMTP_PORT: process.env.SMTP_PORT || '587',
SMTP_SECURE: process.env.SMTP_SECURE || 'true',
SMTP_USER: process.env.SMTP_USER || '',
SMTP_PASSWORD: process.env.SMTP_PASSWORD || '',
// Mailtrap Configuration
MAILTRAP_API_TOKEN: process.env.MAILTRAP_API_TOKEN || '',
MAILTRAP_INVITATION_TEMPLATE_UUID: process.env.MAILTRAP_INVITATION_TEMPLATE_UUID || '',
MAILTRAP_EDIT_USER_TEMPLATE_UUID: process.env.MAILTRAP_EDIT_USER_TEMPLATE_UUID || '',
MAILTRAP_DISABLE_TEMPLATE_UUID: process.env.MAILTRAP_DISABLE_TEMPLATE_UUID || '',
MAILTRAP_ENABLE_TEMPLATE_UUID: process.env.MAILTRAP_ENABLE_TEMPLATE_UUID || '',
MAILTRAP_DELETE_USER_TEMPLATE_UUID: process.env.MAILTRAP_DELETE_USER_TEMPLATE_UUID || '',
// OneSignal Push Notifications
ONESIGNAL_APP_ID: process.env.ONESIGNAL_APP_ID || '',
ONESIGNAL_API_KEY: process.env.ONESIGNAL_API_KEY || '',
// Admin Configuration
SUPER_ADMIN_EMAIL: process.env.SUPER_ADMIN_EMAIL || 'admin@yourdomain.com',
SUPER_ADMIN_PASSWORD: process.env.SUPER_ADMIN_PASSWORD || 'YourSecureAdminPassword123!',
// Google OAuth
GOOGLE_CLIENT_ID: process.env.GOOGLE_CLIENT_ID || '',
GOOGLE_CLIENT_SECRET: process.env.GOOGLE_CLIENT_SECRET || '',
// Other Configuration
OTP_LIMITER: process.env.OTP_LIMITER || '5',
SECRET_KEY: process.env.SECRET_KEY || 'another-random-secret-key-for-general-encryption',
ACCESS_KEY: process.env.ACCESS_KEY || '',
DB_SYNC: process.env.DB_SYNC || 'false',
// Redis (if used)
AZURE_REDIS_CONNECTIONSTRING: process.env.AZURE_REDIS_CONNECTIONSTRING || '',
// Docker Registry (for deployment)
DOCKER_REGISTRY_SERVER_URL: process.env.DOCKER_REGISTRY_SERVER_URL || '',
DOCKER_REGISTRY_SERVER_USERNAME: process.env.DOCKER_REGISTRY_SERVER_USERNAME || '',
DOCKER_REGISTRY_SERVER_PASSWORD: process.env.DOCKER_REGISTRY_SERVER_PASSWORD || '',
// Doppler (if used for secrets management)
DOPPLER_PROJECT: process.env.DOPPLER_PROJECT || '',
DOPPLER_CONFIG: process.env.DOPPLER_CONFIG || '',
DOPPLER_ENVIRONMENT: process.env.DOPPLER_ENVIRONMENT || '',
// Azure specific
WEBSITES_ENABLE_APP_SERVICE_STORAGE: process.env.WEBSITES_ENABLE_APP_SERVICE_STORAGE || 'false',
},
secrets: {
AZURE_POSTGRESQL_PASSWORD: ecs.Secret.fromSecretsManager(
dbCluster.secret!,
'password'
),
},
logDriver: ecs.LogDrivers.awsLogs({
streamPrefix: 'syncrow-backend',
logGroup,
}),
},
publicLoadBalancer: true,
securityGroups: [ecsSecurityGroup],
});
// Add security group to load balancer after creation
fargateService.loadBalancer.addSecurityGroup(albSecurityGroup);
// Configure health check
fargateService.targetGroup.configureHealthCheck({
path: '/health',
healthyHttpCodes: '200',
interval: cdk.Duration.seconds(30),
timeout: cdk.Duration.seconds(5),
healthyThresholdCount: 2,
unhealthyThresholdCount: 3,
});
// Auto Scaling
const scalableTarget = fargateService.service.autoScaleTaskCount({
minCapacity: 1,
maxCapacity: 10,
});
scalableTarget.scaleOnCpuUtilization('CpuScaling', {
targetUtilizationPercent: 70,
scaleInCooldown: cdk.Duration.minutes(5),
scaleOutCooldown: cdk.Duration.minutes(2),
});
scalableTarget.scaleOnMemoryUtilization('MemoryScaling', {
targetUtilizationPercent: 80,
scaleInCooldown: cdk.Duration.minutes(5),
scaleOutCooldown: cdk.Duration.minutes(2),
});
// For now, let's update the web app to use HTTPS URL and handle the certificate warning
// In production, you'll add a proper SSL certificate for api.syncrow.ae
// Grant ECS task access to RDS credentials
if (dbCluster.secret) {
dbCluster.secret.grantRead(fargateService.taskDefinition.taskRole);
}
this.apiUrl = 'https://api.syncrow.me';
this.databaseEndpoint = dbCluster.clusterEndpoint.hostname;
// Outputs
new cdk.CfnOutput(this, 'ApiUrl', {
value: this.apiUrl,
description: 'Application Load Balancer URL',
exportName: `${this.stackName}-ApiUrl`,
});
new cdk.CfnOutput(this, 'DatabaseEndpoint', {
value: this.databaseEndpoint,
description: 'RDS Cluster Endpoint',
exportName: `${this.stackName}-DatabaseEndpoint`,
});
new cdk.CfnOutput(this, 'EcrRepositoryUri', {
value: ecrRepository.repositoryUri,
description: 'ECR Repository URI',
exportName: `${this.stackName}-EcrRepositoryUri`,
});
new cdk.CfnOutput(this, 'ClusterName', {
value: cluster.clusterName,
description: 'ECS Cluster Name',
exportName: `${this.stackName}-ClusterName`,
});
new cdk.CfnOutput(this, 'ServiceName', {
value: fargateService.service.serviceName,
description: 'ECS Service Name',
exportName: `${this.stackName}-ServiceName`,
});
}
}

View File

@ -48,7 +48,12 @@ export class DeviceStatusFirebaseService {
});
// Initialize firebaseDb using firebaseDataBase function
try {
this.firebaseDb = firebaseDataBase(this.configService);
} catch (error) {
console.warn('Firebase initialization failed, continuing without Firebase:', error.message);
this.firebaseDb = null;
}
this.isDevEnv =
this.configService.get<string>('NODE_ENV') === 'development';
}
@ -170,6 +175,14 @@ export class DeviceStatusFirebaseService {
async createDeviceStatusFirebase(
addDeviceStatusDto: AddDeviceStatusDto,
): Promise<any> {
// Check if Firebase is available
if (!this.firebaseDb) {
console.warn('Firebase not available, skipping Firebase operations');
// Still process the database logs but skip Firebase operations
await this.processDeviceStatusLogs(addDeviceStatusDto);
return { message: 'Device status processed without Firebase' };
}
const dataRef = ref(
this.firebaseDb,
`device-status/${addDeviceStatusDto.deviceUuid}`,
@ -339,4 +352,127 @@ export class DeviceStatusFirebaseService {
const snapshot: DataSnapshot = await get(dataRef);
return snapshot.val();
}
private async processDeviceStatusLogs(addDeviceStatusDto: AddDeviceStatusDto): Promise<void> {
if (this.isDevEnv) {
// Save logs to your repository
const newLogs = addDeviceStatusDto.log.properties.map((property) => {
return this.deviceStatusLogRepository.create({
deviceId: addDeviceStatusDto.deviceUuid,
deviceTuyaId: addDeviceStatusDto.deviceTuyaUuid,
productId: addDeviceStatusDto.log.productId,
log: addDeviceStatusDto.log,
code: property.code,
value: property.value,
eventId: addDeviceStatusDto.log.dataId,
eventTime: new Date(property.time).toISOString(),
});
});
await this.deviceStatusLogRepository.save(newLogs);
if (addDeviceStatusDto.productType === ProductType.PC) {
const energyCodes = new Set([
PowerClampEnergyEnum.ENERGY_CONSUMED,
PowerClampEnergyEnum.ENERGY_CONSUMED_A,
PowerClampEnergyEnum.ENERGY_CONSUMED_B,
PowerClampEnergyEnum.ENERGY_CONSUMED_C,
]);
const energyStatus = addDeviceStatusDto?.log?.properties?.find(
(status) => energyCodes.has(status.code),
);
if (energyStatus) {
await this.powerClampService.updateEnergyConsumedHistoricalData(
addDeviceStatusDto.deviceUuid,
);
}
}
if (
addDeviceStatusDto.productType === ProductType.CPS ||
addDeviceStatusDto.productType === ProductType.WPS
) {
const occupancyCodes = new Set([PresenceSensorEnum.PRESENCE_STATE]);
const occupancyStatus = addDeviceStatusDto?.log?.properties?.find(
(status) => occupancyCodes.has(status.code),
);
if (occupancyStatus) {
await this.occupancyService.updateOccupancySensorHistoricalData(
addDeviceStatusDto.deviceUuid,
);
await this.occupancyService.updateOccupancySensorHistoricalDurationData(
addDeviceStatusDto.deviceUuid,
);
}
}
if (addDeviceStatusDto.productType === ProductType.AQI) {
await this.aqiDataService.updateAQISensorHistoricalData(
addDeviceStatusDto.deviceUuid,
);
}
} else {
// Save logs to your repository
const newLogs = addDeviceStatusDto?.status.map((property) => {
return this.deviceStatusLogRepository.create({
deviceId: addDeviceStatusDto.deviceUuid,
deviceTuyaId: addDeviceStatusDto.deviceTuyaUuid,
productId: addDeviceStatusDto.log.productKey,
log: addDeviceStatusDto.log,
code: property.code,
value: property.value,
eventId: addDeviceStatusDto.log.dataId,
eventTime: new Date(property.t).toISOString(),
});
});
await this.deviceStatusLogRepository.save(newLogs);
if (addDeviceStatusDto.productType === ProductType.PC) {
const energyCodes = new Set([
PowerClampEnergyEnum.ENERGY_CONSUMED,
PowerClampEnergyEnum.ENERGY_CONSUMED_A,
PowerClampEnergyEnum.ENERGY_CONSUMED_B,
PowerClampEnergyEnum.ENERGY_CONSUMED_C,
]);
const energyStatus = addDeviceStatusDto?.status?.find((status) => {
return energyCodes.has(status.code as PowerClampEnergyEnum);
});
if (energyStatus) {
await this.powerClampService.updateEnergyConsumedHistoricalData(
addDeviceStatusDto.deviceUuid,
);
}
}
if (
addDeviceStatusDto.productType === ProductType.CPS ||
addDeviceStatusDto.productType === ProductType.WPS
) {
const occupancyCodes = new Set([PresenceSensorEnum.PRESENCE_STATE]);
const occupancyStatus = addDeviceStatusDto?.status?.find((status) => {
return occupancyCodes.has(status.code as PresenceSensorEnum);
});
if (occupancyStatus) {
await this.occupancyService.updateOccupancySensorHistoricalData(
addDeviceStatusDto.deviceUuid,
);
await this.occupancyService.updateOccupancySensorHistoricalDurationData(
addDeviceStatusDto.deviceUuid,
);
}
}
if (addDeviceStatusDto.productType === ProductType.AQI) {
await this.aqiDataService.updateAQISensorHistoricalData(
addDeviceStatusDto.deviceUuid,
);
}
}
}
}

View File

@ -3,6 +3,7 @@ import { getDatabase } from 'firebase/database';
import { ConfigService } from '@nestjs/config';
export const initializeFirebaseApp = (configService: ConfigService) => {
try {
const firebaseConfig = {
apiKey: configService.get<string>('FIREBASE_API_KEY'),
authDomain: configService.get<string>('FIREBASE_AUTH_DOMAIN'),
@ -16,8 +17,18 @@ export const initializeFirebaseApp = (configService: ConfigService) => {
databaseURL: configService.get<string>('FIREBASE_DATABASE_URL'),
};
// Check if required Firebase config is available
if (!firebaseConfig.projectId || firebaseConfig.projectId === 'placeholder-project') {
console.warn('Firebase configuration not available, Firebase features will be disabled');
return null;
}
const app = initializeApp(firebaseConfig);
return getDatabase(app);
} catch (error) {
console.warn('Firebase initialization failed, Firebase features will be disabled:', error.message);
return null;
}
};
export const firebaseDataBase = (configService: ConfigService) =>

660
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -6,19 +6,24 @@
"private": true,
"license": "UNLICENSED",
"scripts": {
"build": "npm run test && npx nest build",
"build": "npx nest build",
"build:lambda": "npx nest build && cp package*.json dist/",
"format": "prettier --write \"apps/**/*.ts\" \"libs/**/*.ts\"",
"start": "npm run test && node dist/main",
"start:dev": "npm run test && npx nest start --watch",
"start": "node dist/main",
"start:dev": "npx nest start --watch",
"dev": "npx nest start --watch",
"start:debug": "npm run test && npx nest start --debug --watch",
"start:prod": "npm run test && node dist/main",
"start:debug": "npx nest start --debug --watch",
"start:prod": "node dist/main",
"start:lambda": "node dist/lambda",
"lint": "eslint \"{src,apps,libs,test}/**/*.ts\" --fix",
"test": "jest --config jest.config.js",
"test:watch": "jest --watch --config jest.config.js",
"test:cov": "jest --coverage --config jest.config.js",
"test:debug": "node --inspect-brk -r tsconfig-paths/register -r ts-node/register node_modules/.bin/jest --runInBand",
"test:e2e": "jest --config ./apps/backend/test/jest-e2e.json"
"test:e2e": "jest --config ./apps/backend/test/jest-e2e.json",
"deploy": "./deploy.sh",
"infra:deploy": "cdk deploy SyncrowBackendStack",
"infra:destroy": "cdk destroy SyncrowBackendStack"
},
"dependencies": {
"@fast-csv/format": "^5.0.2",
@ -36,13 +41,16 @@
"@nestjs/typeorm": "^10.0.2",
"@nestjs/websockets": "^10.3.8",
"@tuya/tuya-connector-nodejs": "^2.1.2",
"@types/aws-lambda": "^8.10.150",
"argon2": "^0.40.1",
"aws-serverless-express": "^3.4.0",
"axios": "^1.7.7",
"bcryptjs": "^2.4.3",
"class-transformer": "^0.5.1",
"class-validator": "^0.14.1",
"crypto-js": "^4.2.0",
"csv-parser": "^3.2.0",
"dotenv": "^17.0.0",
"express-rate-limit": "^7.1.5",
"firebase": "^10.12.5",
"google-auth-library": "^9.14.1",
@ -52,11 +60,13 @@
"nest-winston": "^1.10.2",
"nodemailer": "^6.9.10",
"onesignal-node": "^3.4.0",
"passport": "^0.7.0",
"passport-jwt": "^4.0.1",
"pg": "^8.11.3",
"reflect-metadata": "^0.2.0",
"rxjs": "^7.8.1",
"typeorm": "^0.3.20",
"webpack": "^5.99.9",
"winston": "^3.17.0",
"ws": "^8.17.0"
},
@ -73,7 +83,9 @@
"@types/supertest": "^6.0.0",
"@typescript-eslint/eslint-plugin": "^6.0.0",
"@typescript-eslint/parser": "^6.0.0",
"aws-cdk-lib": "^2.202.0",
"concurrently": "^8.2.2",
"constructs": "^10.4.2",
"eslint": "^8.42.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-import": "^2.31.0",

View File

@ -57,7 +57,8 @@ async function bootstrap() {
logger.error('Seeding failed!', error.stack || error);
}
logger.log('Starting auth at port ...', process.env.PORT || 4000);
await app.listen(process.env.PORT || 4000);
const port = process.env.PORT || 3000;
logger.log(`Starting application on port ${port}...`);
await app.listen(port, '0.0.0.0');
}
bootstrap();