Lines Matching refs:mdev_state

141 struct mdev_state;
146 struct mdev_state *mdev_state; member
153 struct mdev_state { struct
216 static bool is_intx(struct mdev_state *mdev_state) in is_intx() argument
218 return mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX; in is_intx()
221 static bool is_msi(struct mdev_state *mdev_state) in is_msi() argument
223 return mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX; in is_msi()
226 static bool is_noirq(struct mdev_state *mdev_state) in is_noirq() argument
228 return !is_intx(mdev_state) && !is_msi(mdev_state); in is_noirq()
231 static void mtty_trigger_interrupt(struct mdev_state *mdev_state) in mtty_trigger_interrupt() argument
233 lockdep_assert_held(&mdev_state->ops_lock); in mtty_trigger_interrupt()
235 if (is_msi(mdev_state)) { in mtty_trigger_interrupt()
236 if (mdev_state->msi_evtfd) in mtty_trigger_interrupt()
237 eventfd_signal(mdev_state->msi_evtfd); in mtty_trigger_interrupt()
238 } else if (is_intx(mdev_state)) { in mtty_trigger_interrupt()
239 if (mdev_state->intx_evtfd && !mdev_state->intx_mask) { in mtty_trigger_interrupt()
240 eventfd_signal(mdev_state->intx_evtfd); in mtty_trigger_interrupt()
241 mdev_state->intx_mask = true; in mtty_trigger_interrupt()
246 static void mtty_create_config_space(struct mdev_state *mdev_state) in mtty_create_config_space() argument
249 STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348); in mtty_create_config_space()
252 STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001); in mtty_create_config_space()
255 STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200); in mtty_create_config_space()
258 mdev_state->vconfig[0x8] = 0x10; in mtty_create_config_space()
261 mdev_state->vconfig[0x9] = 0x02; in mtty_create_config_space()
264 mdev_state->vconfig[0xa] = 0x00; in mtty_create_config_space()
267 mdev_state->vconfig[0xb] = 0x07; in mtty_create_config_space()
271 STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001); in mtty_create_config_space()
272 mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1; in mtty_create_config_space()
274 if (mdev_state->nr_ports == 2) { in mtty_create_config_space()
276 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001); in mtty_create_config_space()
277 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1; in mtty_create_config_space()
281 STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348); in mtty_create_config_space()
283 mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */ in mtty_create_config_space()
284 mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */ in mtty_create_config_space()
287 mdev_state->vconfig[0x40] = 0x23; in mtty_create_config_space()
288 mdev_state->vconfig[0x43] = 0x80; in mtty_create_config_space()
289 mdev_state->vconfig[0x44] = 0x23; in mtty_create_config_space()
290 mdev_state->vconfig[0x48] = 0x23; in mtty_create_config_space()
291 mdev_state->vconfig[0x4c] = 0x23; in mtty_create_config_space()
293 mdev_state->vconfig[0x60] = 0x50; in mtty_create_config_space()
294 mdev_state->vconfig[0x61] = 0x43; in mtty_create_config_space()
295 mdev_state->vconfig[0x62] = 0x49; in mtty_create_config_space()
296 mdev_state->vconfig[0x63] = 0x20; in mtty_create_config_space()
297 mdev_state->vconfig[0x64] = 0x53; in mtty_create_config_space()
298 mdev_state->vconfig[0x65] = 0x65; in mtty_create_config_space()
299 mdev_state->vconfig[0x66] = 0x72; in mtty_create_config_space()
300 mdev_state->vconfig[0x67] = 0x69; in mtty_create_config_space()
301 mdev_state->vconfig[0x68] = 0x61; in mtty_create_config_space()
302 mdev_state->vconfig[0x69] = 0x6c; in mtty_create_config_space()
303 mdev_state->vconfig[0x6a] = 0x2f; in mtty_create_config_space()
304 mdev_state->vconfig[0x6b] = 0x55; in mtty_create_config_space()
305 mdev_state->vconfig[0x6c] = 0x41; in mtty_create_config_space()
306 mdev_state->vconfig[0x6d] = 0x52; in mtty_create_config_space()
307 mdev_state->vconfig[0x6e] = 0x54; in mtty_create_config_space()
310 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, in handle_pci_cfg_write() argument
321 mdev_state->vconfig[0x3c] = buf[0]; in handle_pci_cfg_write()
336 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) { in handle_pci_cfg_write()
337 STORE_LE32(&mdev_state->vconfig[offset], 0); in handle_pci_cfg_write()
345 bar_mask = mdev_state->bar_mask[bar_index]; in handle_pci_cfg_write()
349 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul); in handle_pci_cfg_write()
350 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); in handle_pci_cfg_write()
355 STORE_LE32(&mdev_state->vconfig[offset], 0); in handle_pci_cfg_write()
364 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state, in handle_bar_write() argument
373 if (mdev_state->s[index].dlab) { in handle_bar_write()
374 mdev_state->s[index].divisor |= data; in handle_bar_write()
378 mutex_lock(&mdev_state->rxtx_lock); in handle_bar_write()
381 if (mdev_state->s[index].rxtx.count < in handle_bar_write()
382 mdev_state->s[index].max_fifo_size) { in handle_bar_write()
383 mdev_state->s[index].rxtx.fifo[ in handle_bar_write()
384 mdev_state->s[index].rxtx.head] = data; in handle_bar_write()
385 mdev_state->s[index].rxtx.count++; in handle_bar_write()
386 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head); in handle_bar_write()
387 mdev_state->s[index].overrun = false; in handle_bar_write()
393 if ((mdev_state->s[index].uart_reg[UART_IER] & in handle_bar_write()
395 (mdev_state->s[index].rxtx.count == in handle_bar_write()
396 mdev_state->s[index].intr_trigger_level)) { in handle_bar_write()
402 mtty_trigger_interrupt(mdev_state); in handle_bar_write()
408 mdev_state->s[index].overrun = true; in handle_bar_write()
414 if (mdev_state->s[index].uart_reg[UART_IER] & in handle_bar_write()
416 mtty_trigger_interrupt(mdev_state); in handle_bar_write()
418 mutex_unlock(&mdev_state->rxtx_lock); in handle_bar_write()
423 if (mdev_state->s[index].dlab) in handle_bar_write()
424 mdev_state->s[index].divisor |= (u16)data << 8; in handle_bar_write()
426 mdev_state->s[index].uart_reg[offset] = data; in handle_bar_write()
427 mutex_lock(&mdev_state->rxtx_lock); in handle_bar_write()
429 (mdev_state->s[index].rxtx.head == in handle_bar_write()
430 mdev_state->s[index].rxtx.tail)) { in handle_bar_write()
435 mtty_trigger_interrupt(mdev_state); in handle_bar_write()
438 mutex_unlock(&mdev_state->rxtx_lock); in handle_bar_write()
444 mdev_state->s[index].fcr = data; in handle_bar_write()
446 mutex_lock(&mdev_state->rxtx_lock); in handle_bar_write()
449 mdev_state->s[index].rxtx.count = 0; in handle_bar_write()
450 mdev_state->s[index].rxtx.head = 0; in handle_bar_write()
451 mdev_state->s[index].rxtx.tail = 0; in handle_bar_write()
453 mutex_unlock(&mdev_state->rxtx_lock); in handle_bar_write()
457 mdev_state->s[index].intr_trigger_level = 1; in handle_bar_write()
461 mdev_state->s[index].intr_trigger_level = 4; in handle_bar_write()
465 mdev_state->s[index].intr_trigger_level = 8; in handle_bar_write()
469 mdev_state->s[index].intr_trigger_level = 14; in handle_bar_write()
478 mdev_state->s[index].intr_trigger_level = 1; in handle_bar_write()
480 mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE; in handle_bar_write()
482 mdev_state->s[index].max_fifo_size = 1; in handle_bar_write()
483 mdev_state->s[index].intr_trigger_level = 1; in handle_bar_write()
490 mdev_state->s[index].dlab = true; in handle_bar_write()
491 mdev_state->s[index].divisor = 0; in handle_bar_write()
493 mdev_state->s[index].dlab = false; in handle_bar_write()
495 mdev_state->s[index].uart_reg[offset] = data; in handle_bar_write()
499 mdev_state->s[index].uart_reg[offset] = data; in handle_bar_write()
501 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) && in handle_bar_write()
506 mtty_trigger_interrupt(mdev_state); in handle_bar_write()
509 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) && in handle_bar_write()
514 mtty_trigger_interrupt(mdev_state); in handle_bar_write()
524 mdev_state->s[index].uart_reg[offset] = data; in handle_bar_write()
532 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state, in handle_bar_read() argument
539 if (mdev_state->s[index].dlab) { in handle_bar_read()
540 *buf = (u8)mdev_state->s[index].divisor; in handle_bar_read()
544 mutex_lock(&mdev_state->rxtx_lock); in handle_bar_read()
546 if (mdev_state->s[index].rxtx.head != in handle_bar_read()
547 mdev_state->s[index].rxtx.tail) { in handle_bar_read()
548 *buf = mdev_state->s[index].rxtx.fifo[ in handle_bar_read()
549 mdev_state->s[index].rxtx.tail]; in handle_bar_read()
550 mdev_state->s[index].rxtx.count--; in handle_bar_read()
551 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail); in handle_bar_read()
554 if (mdev_state->s[index].rxtx.head == in handle_bar_read()
555 mdev_state->s[index].rxtx.tail) { in handle_bar_read()
563 if (mdev_state->s[index].uart_reg[UART_IER] & in handle_bar_read()
565 mtty_trigger_interrupt(mdev_state); in handle_bar_read()
567 mutex_unlock(&mdev_state->rxtx_lock); in handle_bar_read()
572 if (mdev_state->s[index].dlab) { in handle_bar_read()
573 *buf = (u8)(mdev_state->s[index].divisor >> 8); in handle_bar_read()
576 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f; in handle_bar_read()
581 u8 ier = mdev_state->s[index].uart_reg[UART_IER]; in handle_bar_read()
584 mutex_lock(&mdev_state->rxtx_lock); in handle_bar_read()
586 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun) in handle_bar_read()
591 (mdev_state->s[index].rxtx.count >= in handle_bar_read()
592 mdev_state->s[index].intr_trigger_level)) in handle_bar_read()
597 (mdev_state->s[index].rxtx.head == in handle_bar_read()
598 mdev_state->s[index].rxtx.tail)) in handle_bar_read()
603 (mdev_state->s[index].uart_reg[UART_MCR] & in handle_bar_read()
613 mutex_unlock(&mdev_state->rxtx_lock); in handle_bar_read()
619 *buf = mdev_state->s[index].uart_reg[offset]; in handle_bar_read()
626 mutex_lock(&mdev_state->rxtx_lock); in handle_bar_read()
628 if (mdev_state->s[index].rxtx.head != in handle_bar_read()
629 mdev_state->s[index].rxtx.tail) in handle_bar_read()
633 if (mdev_state->s[index].overrun) in handle_bar_read()
637 if (mdev_state->s[index].rxtx.head == in handle_bar_read()
638 mdev_state->s[index].rxtx.tail) in handle_bar_read()
641 mutex_unlock(&mdev_state->rxtx_lock); in handle_bar_read()
648 mutex_lock(&mdev_state->rxtx_lock); in handle_bar_read()
650 if (mdev_state->s[index].uart_reg[UART_MCR] & in handle_bar_read()
652 if (mdev_state->s[index].rxtx.count < in handle_bar_read()
653 mdev_state->s[index].max_fifo_size) in handle_bar_read()
657 mutex_unlock(&mdev_state->rxtx_lock); in handle_bar_read()
662 *buf = mdev_state->s[index].uart_reg[offset]; in handle_bar_read()
670 static void mdev_read_base(struct mdev_state *mdev_state) in mdev_read_base() argument
680 if (!mdev_state->region_info[index].size) in mdev_read_base()
683 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) & in mdev_read_base()
685 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) & in mdev_read_base()
690 start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4)); in mdev_read_base()
702 mdev_state->region_info[index].start = ((u64)start_hi << 32) | in mdev_read_base()
707 static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count, in mdev_access() argument
717 mutex_lock(&mdev_state->ops_lock); in mdev_access()
730 handle_pci_cfg_write(mdev_state, offset, buf, count); in mdev_access()
732 memcpy(buf, (mdev_state->vconfig + offset), count); in mdev_access()
739 if (!mdev_state->region_info[index].start) in mdev_access()
740 mdev_read_base(mdev_state); in mdev_access()
748 *buf, mdev_state->s[index].dlab); in mdev_access()
750 handle_bar_write(index, mdev_state, offset, buf, count); in mdev_access()
752 handle_bar_read(index, mdev_state, offset, buf, count); in mdev_access()
758 *buf, mdev_state->s[index].dlab); in mdev_access()
772 mutex_unlock(&mdev_state->ops_lock); in mdev_access()
777 static size_t mtty_data_size(struct mdev_state *mdev_state) in mtty_data_size() argument
780 (mdev_state->nr_ports * sizeof(struct serial_port)); in mtty_data_size()
792 static void mtty_disable_files(struct mdev_state *mdev_state) in mtty_disable_files() argument
794 if (mdev_state->saving_migf) { in mtty_disable_files()
795 mtty_disable_file(mdev_state->saving_migf); in mtty_disable_files()
796 fput(mdev_state->saving_migf->filp); in mtty_disable_files()
797 mdev_state->saving_migf = NULL; in mtty_disable_files()
800 if (mdev_state->resuming_migf) { in mtty_disable_files()
801 mtty_disable_file(mdev_state->resuming_migf); in mtty_disable_files()
802 fput(mdev_state->resuming_migf->filp); in mtty_disable_files()
803 mdev_state->resuming_migf = NULL; in mtty_disable_files()
807 static void mtty_state_mutex_unlock(struct mdev_state *mdev_state) in mtty_state_mutex_unlock() argument
810 mutex_lock(&mdev_state->reset_mutex); in mtty_state_mutex_unlock()
811 if (mdev_state->deferred_reset) { in mtty_state_mutex_unlock()
812 mdev_state->deferred_reset = false; in mtty_state_mutex_unlock()
813 mutex_unlock(&mdev_state->reset_mutex); in mtty_state_mutex_unlock()
814 mdev_state->state = VFIO_DEVICE_STATE_RUNNING; in mtty_state_mutex_unlock()
815 mtty_disable_files(mdev_state); in mtty_state_mutex_unlock()
818 mutex_unlock(&mdev_state->state_mutex); in mtty_state_mutex_unlock()
819 mutex_unlock(&mdev_state->reset_mutex); in mtty_state_mutex_unlock()
837 struct mdev_state *mdev_state = migf->mdev_state; in mtty_precopy_ioctl() local
853 mutex_lock(&mdev_state->state_mutex); in mtty_precopy_ioctl()
854 if (mdev_state->state != VFIO_DEVICE_STATE_PRE_COPY && in mtty_precopy_ioctl()
855 mdev_state->state != VFIO_DEVICE_STATE_PRE_COPY_P2P) { in mtty_precopy_ioctl()
880 mtty_state_mutex_unlock(mdev_state); in mtty_precopy_ioctl()
897 dev_dbg(migf->mdev_state->vdev.dev, "%s ask %zu\n", __func__, len); in mtty_save_read()
919 dev_dbg(migf->mdev_state->vdev.dev, "%s read %zu\n", __func__, ret); in mtty_save_read()
932 static void mtty_save_state(struct mdev_state *mdev_state) in mtty_save_state() argument
934 struct mtty_migration_file *migf = mdev_state->saving_migf; in mtty_save_state()
938 for (i = 0; i < mdev_state->nr_ports; i++) { in mtty_save_state()
940 &mdev_state->s[i], sizeof(struct serial_port)); in mtty_save_state()
943 dev_dbg(mdev_state->vdev.dev, in mtty_save_state()
948 static int mtty_load_state(struct mdev_state *mdev_state) in mtty_load_state() argument
950 struct mtty_migration_file *migf = mdev_state->resuming_migf; in mtty_load_state()
955 if (migf->filled_size < mtty_data_size(mdev_state)) { in mtty_load_state()
956 dev_dbg(mdev_state->vdev.dev, "%s expected %zu, got %zu\n", in mtty_load_state()
957 __func__, mtty_data_size(mdev_state), in mtty_load_state()
963 for (i = 0; i < mdev_state->nr_ports; i++) in mtty_load_state()
964 memcpy(&mdev_state->s[i], in mtty_load_state()
972 mtty_save_device_data(struct mdev_state *mdev_state, in mtty_save_device_data() argument
975 struct mtty_migration_file *migf = mdev_state->saving_migf; in mtty_save_device_data()
999 migf->mdev_state = mdev_state; in mtty_save_device_data()
1004 migf->data.nr_ports = mdev_state->nr_ports; in mtty_save_device_data()
1008 dev_dbg(mdev_state->vdev.dev, "%s filled header to %zu\n", in mtty_save_device_data()
1011 ret = mdev_state->saving_migf = migf; in mtty_save_device_data()
1015 mtty_save_state(mdev_state); in mtty_save_device_data()
1024 struct mdev_state *mdev_state = migf->mdev_state; in mtty_resume_write() local
1037 if (requested_length > mtty_data_size(mdev_state)) in mtty_resume_write()
1055 dev_dbg(migf->mdev_state->vdev.dev, "%s received %zu, total %zu\n", in mtty_resume_write()
1063 migf->data.nr_ports != mdev_state->nr_ports) { in mtty_resume_write()
1064 dev_dbg(migf->mdev_state->vdev.dev, in mtty_resume_write()
1068 dev_dbg(migf->mdev_state->vdev.dev, in mtty_resume_write()
1087 mtty_resume_device_data(struct mdev_state *mdev_state) in mtty_resume_device_data() argument
1106 migf->mdev_state = mdev_state; in mtty_resume_device_data()
1108 mdev_state->resuming_migf = migf; in mtty_resume_device_data()
1113 static struct file *mtty_step_state(struct mdev_state *mdev_state, in mtty_step_state() argument
1116 enum vfio_device_mig_state cur = mdev_state->state; in mtty_step_state()
1118 dev_dbg(mdev_state->vdev.dev, "%s: %d -> %d\n", __func__, cur, new); in mtty_step_state()
1158 ret = mtty_load_state(mdev_state); in mtty_step_state()
1161 mtty_disable_files(mdev_state); in mtty_step_state()
1171 mtty_disable_files(mdev_state); in mtty_step_state()
1194 migf = mtty_save_device_data(mdev_state, new); in mtty_step_state()
1210 migf = mtty_resume_device_data(mdev_state); in mtty_step_state()
1227 struct mdev_state *mdev_state = in mtty_set_state() local
1228 container_of(vdev, struct mdev_state, vdev); in mtty_set_state()
1233 mutex_lock(&mdev_state->state_mutex); in mtty_set_state()
1234 while (mdev_state->state != new_state) { in mtty_set_state()
1236 int rc = vfio_mig_get_next_state(vdev, mdev_state->state, in mtty_set_state()
1243 ret = mtty_step_state(mdev_state, next_state); in mtty_set_state()
1247 mdev_state->state = next_state; in mtty_set_state()
1255 mtty_state_mutex_unlock(mdev_state); in mtty_set_state()
1262 struct mdev_state *mdev_state = in mtty_get_state() local
1263 container_of(vdev, struct mdev_state, vdev); in mtty_get_state()
1265 mutex_lock(&mdev_state->state_mutex); in mtty_get_state()
1266 *current_state = mdev_state->state; in mtty_get_state()
1267 mtty_state_mutex_unlock(mdev_state); in mtty_get_state()
1274 struct mdev_state *mdev_state = in mtty_get_data_size() local
1275 container_of(vdev, struct mdev_state, vdev); in mtty_get_data_size()
1277 *stop_copy_length = mtty_data_size(mdev_state); in mtty_get_data_size()
1314 struct mdev_state *mdev_state = in mtty_init_dev() local
1315 container_of(vdev, struct mdev_state, vdev); in mtty_init_dev()
1329 mdev_state->nr_ports = type->nr_ports; in mtty_init_dev()
1330 mdev_state->irq_index = -1; in mtty_init_dev()
1331 mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE; in mtty_init_dev()
1332 mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE; in mtty_init_dev()
1333 mutex_init(&mdev_state->rxtx_lock); in mtty_init_dev()
1335 mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL); in mtty_init_dev()
1336 if (!mdev_state->vconfig) { in mtty_init_dev()
1341 mutex_init(&mdev_state->ops_lock); in mtty_init_dev()
1342 mdev_state->mdev = mdev; in mtty_init_dev()
1343 mtty_create_config_space(mdev_state); in mtty_init_dev()
1345 mutex_init(&mdev_state->state_mutex); in mtty_init_dev()
1346 mutex_init(&mdev_state->reset_mutex); in mtty_init_dev()
1352 mdev_state->state = VFIO_DEVICE_STATE_RUNNING; in mtty_init_dev()
1363 struct mdev_state *mdev_state; in mtty_probe() local
1366 mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, in mtty_probe()
1368 if (IS_ERR(mdev_state)) in mtty_probe()
1369 return PTR_ERR(mdev_state); in mtty_probe()
1371 ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); in mtty_probe()
1374 dev_set_drvdata(&mdev->dev, mdev_state); in mtty_probe()
1378 vfio_put_device(&mdev_state->vdev); in mtty_probe()
1384 struct mdev_state *mdev_state = in mtty_release_dev() local
1385 container_of(vdev, struct mdev_state, vdev); in mtty_release_dev()
1387 mutex_destroy(&mdev_state->reset_mutex); in mtty_release_dev()
1388 mutex_destroy(&mdev_state->state_mutex); in mtty_release_dev()
1389 atomic_add(mdev_state->nr_ports, &mdev_avail_ports); in mtty_release_dev()
1390 kfree(mdev_state->vconfig); in mtty_release_dev()
1395 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); in mtty_remove() local
1397 vfio_unregister_group_dev(&mdev_state->vdev); in mtty_remove()
1398 vfio_put_device(&mdev_state->vdev); in mtty_remove()
1401 static int mtty_reset(struct mdev_state *mdev_state) in mtty_reset() argument
1405 mutex_lock(&mdev_state->reset_mutex); in mtty_reset()
1406 mdev_state->deferred_reset = true; in mtty_reset()
1407 if (!mutex_trylock(&mdev_state->state_mutex)) { in mtty_reset()
1408 mutex_unlock(&mdev_state->reset_mutex); in mtty_reset()
1411 mutex_unlock(&mdev_state->reset_mutex); in mtty_reset()
1412 mtty_state_mutex_unlock(mdev_state); in mtty_reset()
1420 struct mdev_state *mdev_state = in mtty_read() local
1421 container_of(vdev, struct mdev_state, vdev); in mtty_read()
1431 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), in mtty_read()
1443 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), in mtty_read()
1455 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), in mtty_read()
1481 struct mdev_state *mdev_state = in mtty_write() local
1482 container_of(vdev, struct mdev_state, vdev); in mtty_write()
1495 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), in mtty_write()
1507 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), in mtty_write()
1519 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), in mtty_write()
1537 static void mtty_disable_intx(struct mdev_state *mdev_state) in mtty_disable_intx() argument
1539 if (mdev_state->intx_evtfd) { in mtty_disable_intx()
1540 eventfd_ctx_put(mdev_state->intx_evtfd); in mtty_disable_intx()
1541 mdev_state->intx_evtfd = NULL; in mtty_disable_intx()
1542 mdev_state->intx_mask = false; in mtty_disable_intx()
1543 mdev_state->irq_index = -1; in mtty_disable_intx()
1547 static void mtty_disable_msi(struct mdev_state *mdev_state) in mtty_disable_msi() argument
1549 if (mdev_state->msi_evtfd) { in mtty_disable_msi()
1550 eventfd_ctx_put(mdev_state->msi_evtfd); in mtty_disable_msi()
1551 mdev_state->msi_evtfd = NULL; in mtty_disable_msi()
1552 mdev_state->irq_index = -1; in mtty_disable_msi()
1556 static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags, in mtty_set_irqs() argument
1562 mutex_lock(&mdev_state->ops_lock); in mtty_set_irqs()
1567 if (!is_intx(mdev_state) || start != 0 || count != 1) { in mtty_set_irqs()
1573 mdev_state->intx_mask = true; in mtty_set_irqs()
1578 mdev_state->intx_mask = true; in mtty_set_irqs()
1584 if (!is_intx(mdev_state) || start != 0 || count != 1) { in mtty_set_irqs()
1590 mdev_state->intx_mask = false; in mtty_set_irqs()
1595 mdev_state->intx_mask = false; in mtty_set_irqs()
1601 if (is_intx(mdev_state) && !count && in mtty_set_irqs()
1603 mtty_disable_intx(mdev_state); in mtty_set_irqs()
1607 if (!(is_intx(mdev_state) || is_noirq(mdev_state)) || in mtty_set_irqs()
1617 mtty_disable_intx(mdev_state); in mtty_set_irqs()
1627 mdev_state->intx_evtfd = evt; in mtty_set_irqs()
1628 mdev_state->irq_index = index; in mtty_set_irqs()
1632 if (!is_intx(mdev_state)) { in mtty_set_irqs()
1638 mtty_trigger_interrupt(mdev_state); in mtty_set_irqs()
1643 mtty_trigger_interrupt(mdev_state); in mtty_set_irqs()
1655 if (is_msi(mdev_state) && !count && in mtty_set_irqs()
1657 mtty_disable_msi(mdev_state); in mtty_set_irqs()
1661 if (!(is_msi(mdev_state) || is_noirq(mdev_state)) || in mtty_set_irqs()
1671 mtty_disable_msi(mdev_state); in mtty_set_irqs()
1681 mdev_state->msi_evtfd = evt; in mtty_set_irqs()
1682 mdev_state->irq_index = index; in mtty_set_irqs()
1686 if (!is_msi(mdev_state)) { in mtty_set_irqs()
1692 mtty_trigger_interrupt(mdev_state); in mtty_set_irqs()
1697 mtty_trigger_interrupt(mdev_state); in mtty_set_irqs()
1703 dev_dbg(mdev_state->vdev.dev, "%s: MSIX_IRQ\n", __func__); in mtty_set_irqs()
1707 dev_dbg(mdev_state->vdev.dev, "%s: ERR_IRQ\n", __func__); in mtty_set_irqs()
1711 dev_dbg(mdev_state->vdev.dev, "%s: REQ_IRQ\n", __func__); in mtty_set_irqs()
1716 mutex_unlock(&mdev_state->ops_lock); in mtty_set_irqs()
1720 static int mtty_get_region_info(struct mdev_state *mdev_state, in mtty_get_region_info() argument
1731 mutex_lock(&mdev_state->ops_lock); in mtty_get_region_info()
1741 if (mdev_state->nr_ports == 2) in mtty_get_region_info()
1749 mdev_state->region_info[bar_index].size = size; in mtty_get_region_info()
1750 mdev_state->region_info[bar_index].vfio_offset = in mtty_get_region_info()
1757 mutex_unlock(&mdev_state->ops_lock); in mtty_get_region_info()
1791 struct mdev_state *mdev_state = in mtty_ioctl() local
1792 container_of(vdev, struct mdev_state, vdev); in mtty_ioctl()
1813 memcpy(&mdev_state->dev_info, &info, sizeof(info)); in mtty_ioctl()
1834 ret = mtty_get_region_info(mdev_state, &info, &cap_type_id, in mtty_ioctl()
1855 (info.index >= mdev_state->dev_info.num_irqs)) in mtty_ioctl()
1879 mdev_state->dev_info.num_irqs, in mtty_ioctl()
1892 ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start, in mtty_ioctl()
1899 return mtty_reset(mdev_state); in mtty_ioctl()
1937 struct mdev_state *mdev_state = in mtty_close() local
1938 container_of(vdev, struct mdev_state, vdev); in mtty_close()
1940 mtty_disable_files(mdev_state); in mtty_close()
1941 mtty_disable_intx(mdev_state); in mtty_close()
1942 mtty_disable_msi(mdev_state); in mtty_close()